In [1]:
from keras.datasets import mnist
Loading training and testing datasets.
In [2]:
(train_images, train_labels),(test_images, test_labels) = mnist.load_data()
In [3]:
train_images.shape
Out[3]:
In [4]:
train_labels.shape
Out[4]:
In [5]:
train_labels
Out[5]:
In [6]:
test_images.shape
Out[6]:
In [7]:
test_labels.shape
Out[7]:
In [8]:
test_labels
Out[8]:
In [9]:
# Importing models like Sequential or Functional Model
from keras import models
In [10]:
# Imporing layers like Dense
from keras import layers
In [11]:
# Initializing a sequential network
network = models.Sequential()
In [12]:
# Adding the input layer
network.add(layers.Dense(units = 512,
activation = 'relu',
input_shape = (28 * 28,)))
In [13]:
# Adding the outpul layer
network.add(layers.Dense(units = 10,
activation = 'softmax'))
In [14]:
# Compiling the network
network.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics = ['accuracy'])
In [15]:
# Reshaping the data from a 3-D array (number of 2-d examples) to a 2-D array (number of 1-D examples)
train_images = train_images.reshape((60000, 28 * 28))
In [16]:
# Changing type of the data to float32
train_images = train_images.astype('float32')
In [17]:
# Rescaling the data from 0-255 to 0-1
train_images = train_images / 255.0
In [18]:
test_images = test_images.reshape((10000, 28 * 28))
test_images = test_images.astype('float32')
test_images = test_images / 255.0
In [19]:
# Importing one-hot encoding tool to_categorical from keras.utils
from keras.utils import to_categorical
In [20]:
# One-hot encoding the labels
train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)
In [21]:
network.fit(train_images,
train_labels,
epochs = 5,
batch_size = 128)
Out[21]:
In [22]:
test_loss, test_acc = network.evaluate(test_images, test_labels)
In [23]:
print('Test acc:', test_acc)